#include <public/arch-ia64/sioemu.h>
#include <asm/dom_fw.h>
#include <asm/debugger.h>
+#include <asm/sal.h>
+#include <asm/vlsapic.h>
-static REGS *
-sioemu_deliver (void)
+static void
+sioemu_save_regs (VCPU *vcpu)
{
- VCPU *vcpu = current;
REGS *regs = vcpu_regs(vcpu);
- unsigned long psr = vmx_vcpu_get_psr(vcpu);
-
- if (vcpu->vcpu_info->evtchn_upcall_mask)
- panic_domain (NULL, "sioemu_deliver: aleady in stub mode\n");
-
- /* All cleared, but keep BN. */
- vmx_vcpu_set_psr(vcpu, IA64_PSR_MC | (psr & IA64_PSR_BN));
- /* Save registers. */
vcpu->arch.arch_vmx.stub_saved[0] = regs->r16;
vcpu->arch.arch_vmx.stub_saved[1] = regs->r17;
vcpu->arch.arch_vmx.stub_saved[2] = regs->r18;
vcpu->arch.arch_vmx.stub_saved[15] = regs->r31;
vcpu->arch.arch_vmx.stub_nats =
(regs->eml_unat >> IA64_PT_REGS_R16_SLOT) & 0xffff;
+}
+
+static void
+sioemu_restore_regs (VCPU *vcpu)
+{
+ REGS *regs = vcpu_regs(vcpu);
+
+ /* First restore registers. */
+ regs->cr_iip = regs->r28;
+ regs->cr_ifs = regs->r30;
+ vmx_vcpu_set_psr (vcpu, regs->r29);
+
+ regs->eml_unat &= ~(0xffffUL << IA64_PT_REGS_R16_SLOT);
+ regs->eml_unat |= vcpu->arch.arch_vmx.stub_nats << IA64_PT_REGS_R16_SLOT;
+
+ regs->r16 = vcpu->arch.arch_vmx.stub_saved[0];
+ regs->r17 = vcpu->arch.arch_vmx.stub_saved[1];
+ regs->r18 = vcpu->arch.arch_vmx.stub_saved[2];
+ regs->r19 = vcpu->arch.arch_vmx.stub_saved[3];
+ regs->r20 = vcpu->arch.arch_vmx.stub_saved[4];
+ regs->r21 = vcpu->arch.arch_vmx.stub_saved[5];
+ regs->r22 = vcpu->arch.arch_vmx.stub_saved[6];
+ regs->r23 = vcpu->arch.arch_vmx.stub_saved[7];
+ regs->r24 = vcpu->arch.arch_vmx.stub_saved[8];
+ regs->r25 = vcpu->arch.arch_vmx.stub_saved[9];
+ regs->r26 = vcpu->arch.arch_vmx.stub_saved[10];
+ regs->r27 = vcpu->arch.arch_vmx.stub_saved[11];
+ regs->r28 = vcpu->arch.arch_vmx.stub_saved[12];
+ regs->r29 = vcpu->arch.arch_vmx.stub_saved[13];
+ regs->r30 = vcpu->arch.arch_vmx.stub_saved[14];
+ regs->r31 = vcpu->arch.arch_vmx.stub_saved[15];
+
+}
+
+static REGS *
+sioemu_deliver (void)
+{
+ VCPU *vcpu = current;
+ REGS *regs = vcpu_regs(vcpu);
+ unsigned long psr = vmx_vcpu_get_psr(vcpu);
+
+ if (vcpu->vcpu_info->evtchn_upcall_mask)
+ panic_domain (NULL, "sioemu_deliver: aleady in stub mode\n");
+
+ /* All cleared, but keep BN. */
+ vmx_vcpu_set_psr(vcpu, IA64_PSR_MC | (psr & IA64_PSR_BN));
+
+ /* Save registers. */
+ sioemu_save_regs (vcpu);
/* Context. */
regs->r28 = regs->cr_iip;
regs->r29 = psr;
regs->r30 = regs->cr_ifs;
- regs->cr_ifs = 0; // pre-cover
+ regs->cr_ifs = 0; // pre-cover
regs->cr_iip = vcpu->arch.event_callback_ip;
regs->eml_unat &= ~(0xffffUL << IA64_PT_REGS_R16_SLOT);
u64 arg3 = regs->r21;
if ((cmd & ~0x1UL) != 0)
- panic_domain (NULL,
- "sioemu_callback_return: bad operation (%lx)\n", cmd);
+ panic_domain (NULL, "sioemu_callback_return: bad operation (%lx)\n",
+ cmd);
/* First restore registers. */
regs->cr_iip = regs->r28;
regs->cr_ifs = regs->r30;
vmx_vcpu_set_psr (vcpu, regs->r29);
- regs->eml_unat &= ~(0xffffUL << IA64_PT_REGS_R16_SLOT);
- regs->eml_unat |= vcpu->arch.arch_vmx.stub_nats << IA64_PT_REGS_R16_SLOT;
-
- regs->r16 = vcpu->arch.arch_vmx.stub_saved[0];
- regs->r17 = vcpu->arch.arch_vmx.stub_saved[1];
- regs->r18 = vcpu->arch.arch_vmx.stub_saved[2];
- regs->r19 = vcpu->arch.arch_vmx.stub_saved[3];
- regs->r20 = vcpu->arch.arch_vmx.stub_saved[4];
- regs->r21 = vcpu->arch.arch_vmx.stub_saved[5];
- regs->r22 = vcpu->arch.arch_vmx.stub_saved[6];
- regs->r23 = vcpu->arch.arch_vmx.stub_saved[7];
- regs->r24 = vcpu->arch.arch_vmx.stub_saved[8];
- regs->r25 = vcpu->arch.arch_vmx.stub_saved[9];
- regs->r26 = vcpu->arch.arch_vmx.stub_saved[10];
- regs->r27 = vcpu->arch.arch_vmx.stub_saved[11];
- regs->r28 = vcpu->arch.arch_vmx.stub_saved[12];
- regs->r29 = vcpu->arch.arch_vmx.stub_saved[13];
- regs->r30 = vcpu->arch.arch_vmx.stub_saved[14];
- regs->r31 = vcpu->arch.arch_vmx.stub_saved[15];
+ sioemu_restore_regs (vcpu);
/* Unmask events. */
vcpu->vcpu_info->evtchn_upcall_mask = 0;
REGS *regs;
regs = sioemu_deliver ();
-
- regs->r16 = 0;
+ regs->r16 = SIOEMU_CB_EVENT;
}
void
REGS *regs;
regs = sioemu_deliver ();
- regs->r16 = 1;
+ regs->r16 = SIOEMU_CB_IO_EMULATE;
regs->r19 = padr;
regs->r20 = data;
regs->r21 = data1;
regs->r22 = word;
}
+void
+sioemu_wakeup_vcpu (int vcpu_id)
+{
+ REGS *regs;
+
+ regs = sioemu_deliver();
+ regs->r16 = SIOEMU_CB_WAKEUP_VCPU;
+ regs->r19 = vcpu_id;
+}
+
+void
+sioemu_sal_assist (struct vcpu *v)
+{
+ REGS *regs;
+
+ regs = sioemu_deliver();
+ regs->r16 = SIOEMU_CB_SAL_ASSIST;
+}
+
static int
sioemu_add_io_physmap (struct domain *d, unsigned long start,
unsigned long size, unsigned long type)
// regs->r2, regs->r8, regs->r9);
if (current->vcpu_info->evtchn_upcall_mask == 0)
- panic_domain (NULL, "sioemu_hypercall: not in stub mode\n");
+ panic_domain(NULL, "sioemu_hypercall: not in stub mode\n");
switch (regs->r2 & FW_HYPERCALL_NUM_MASK_LOW)
{
break;
case SIOEMU_HYPERCALL_START_FW:
regs->cr_iip = regs->r8;
- vmx_vcpu_set_psr (current, regs->r9);
+ vmx_vcpu_set_psr(current, regs->r9);
current->vcpu_info->evtchn_upcall_mask = 0;
break;
case SIOEMU_HYPERCALL_ADD_IO_PHYSMAP:
- regs->r8 = sioemu_add_io_physmap (current->domain,
- regs->r8, regs->r9, regs->r10);
+ regs->r8 = sioemu_add_io_physmap(current->domain,
+ regs->r8, regs->r9, regs->r10);
break;
case SIOEMU_HYPERCALL_GET_TIME:
{
uint64_t sec, nsec;
- get_wallclock (&sec, &nsec);
+ get_wallclock(&sec, &nsec);
regs->r8 = (sec << 30) + nsec;
break;
}
+ case SIOEMU_HYPERCALL_GET_REGS:
+ sioemu_restore_regs(current);
+ break;
+ case SIOEMU_HYPERCALL_SET_REGS:
+ sioemu_save_regs(current);
+ break;
+ case SIOEMU_HYPERCALL_FLUSH_CACHE:
+ regs->r8 = ia64_sal_cache_flush(regs->r8);
+ break;
+ case SIOEMU_HYPERCALL_FREQ_BASE:
+ regs->r8 = ia64_sal_freq_base(regs->r8, ®s->r9, ®s->r10);
+ break;
+ case SIOEMU_HYPERCALL_DELIVER_INT:
+ regs->r8 = vlsapic_deliver_int(current->domain,
+ regs->r8, regs->r9, regs->r10);
+ break;
+ case SIOEMU_HYPERCALL_CALLBACK_RETURN:
+ regs->r2 = regs->r27;
+ sioemu_callback_return ();
+ break;
default:
panic_domain (NULL, "bad sioemu hypercall %lx\n", regs->r2);
break;
#ifndef __XEN_PUBLIC_IA64_SIOEMU_H__
#define __XEN_PUBLIC_IA64_SIOEMU_H__
+/* SIOEMU specific hypercalls.
+ The numbers are the minor part of FW_HYPERCALL_SIOEMU. */
+
/* Defines the callback entry point. r8=ip, r9=data.
Must be called per-vcpu. */
#define SIOEMU_HYPERCALL_SET_CALLBACK 0x01
/* Get wallclock time. */
#define SIOEMU_HYPERCALL_GET_TIME 0x04
+/* Get/Set shadow registers. */
+#define SIOEMU_HYPERCALL_GET_REGS 0x05
+#define SIOEMU_HYPERCALL_SET_REGS 0x06
+
+/* Flush cache. */
+#define SIOEMU_HYPERCALL_FLUSH_CACHE 0x07
+
+/* Get freq base. */
+#define SIOEMU_HYPERCALL_FREQ_BASE 0x08
+
+/* Return from callback. */
+#define SIOEMU_HYPERCALL_CALLBACK_RETURN 0x09
+
+/* Deliver an interrupt. */
+#define SIOEMU_HYPERCALL_DELIVER_INT 0x0a
+
+/* SIOEMU callback reason. */
+
+/* An event (from event channel) has to be delivered. */
+#define SIOEMU_CB_EVENT 0x00
+
+/* Emulate an IO access. */
+#define SIOEMU_CB_IO_EMULATE 0x01
+
+/* An IPI is sent to a dead vcpu. */
+#define SIOEMU_CB_WAKEUP_VCPU 0x02
+
+/* A SAL hypercall is executed. */
+#define SIOEMU_CB_SAL_ASSIST 0x03
+
+
+/* SIOEMU firmware mode hypercalls. */
+
/* Return from callback. r16=0.
Unmask vcpu events. */
#define SIOEMU_HYPERPRIVOP_CALLBACK_RETURN 0x01